In [1]:
## 設定
verbose = False
check = False
### 言語の割合の均等化
balanced = True
### LDA 用
## トピック数
n_topics = 20 # 30は多過ぎる?
## doc, term の設定
doc_type = 'form' # 変更不可
doc_attr = 'spell' # spell か sound の選択
max_doc_size = 12
##
term_size = 'character'
term_type = 'skippy2gram'
## skippy n-gram の結合範囲
max_distance_val = round(max_doc_size * 0.8)
print(f"max_distance_val: {max_distance_val}")
## ngram を包括的にするかどうか
ngram_is_inclusive = True
### DTM 構築
## term の最低頻度
term_min_freq = 2
## 高頻度 term の濫用指標: 大きくし過ぎないように.0.05 は十分に大きい
term_abuse_threshold = 0.04
max_distance_val: 10
In [2]:
import sys, os, random, re, glob
import pandas as pd
import pprint as pp
from functools import reduce
In [3]:
## load data to process
from pathlib import Path
import pprint as pp
wd = Path(".")
dirs = [ x for x in wd.iterdir() if x.is_dir() and not x.match(r"plot*") ]
if verbose:
print(f"The following {len(dirs)} directories are potential targets:")
pp.pprint(dirs)
## list up files in target directory
wd = Path(".")
target_dir = "data-words" # can be changed
target_files = sorted(list(wd.glob(f"{target_dir}/*.csv")))
#
print(f"\n{target_dir} contains {len(target_files)} files to process")
pp.pprint(target_files)
data-words contains 20 files to process
[PosixPath('data-words/base-sound-English-r6e-originals.csv'),
PosixPath('data-words/base-sound-French-r0-opendic-s900.csv'),
PosixPath('data-words/base-sound-German-r1a-original.csv'),
PosixPath('data-words/base-spell-Arabic-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Chinese-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-English-r6e-originals.csv'),
PosixPath('data-words/base-spell-Esperanto-r0-orginal.csv'),
PosixPath('data-words/base-spell-Finnish-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-French-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-German-r1a-originals.csv'),
PosixPath('data-words/base-spell-Greek-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Hungarian-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Icelandic-r0-original.csv'),
PosixPath('data-words/base-spell-Irish-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Italian-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Japanese-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Russian-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Spanish-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Swahili-r0-1k-mc.csv'),
PosixPath('data-words/base-spell-Turkish-r0-1k-mc.csv')]
In [4]:
import pandas as pd
## データ型の辞書
types = re.split(r",\s+", "spell, sound, freq")
type_settings = { t : 0 for t in types }
print(type_settings)
## 言語名の辞書
lang_name_text = "arabic, bengali, chinese, english, esperanto, finnish, french, \
greek, galician, german, hungarian, icelandic, irish, italian, japanese, russian, spanish, swahili, turkish"
langs = re.split(r",\s*", lang_name_text)
#langs = "english esperanto french german russian swahili".split(" ")
#langs = "english esperanto french german icelandic swahili".split(" ")
lang_settings = { lang : 0 for lang in langs }
print(f"{len(lang_settings.keys())} langs are available")
print(lang_settings)
## 辞書と統合
settings = { 'form': None, **type_settings, **lang_settings }
print(settings)
{'spell': 0, 'sound': 0, 'freq': 0}
19 langs are available
{'arabic': 0, 'bengali': 0, 'chinese': 0, 'english': 0, 'esperanto': 0, 'finnish': 0, 'french': 0, 'greek': 0, 'galician': 0, 'german': 0, 'hungarian': 0, 'icelandic': 0, 'irish': 0, 'italian': 0, 'japanese': 0, 'russian': 0, 'spanish': 0, 'swahili': 0, 'turkish': 0}
{'form': None, 'spell': 0, 'sound': 0, 'freq': 0, 'arabic': 0, 'bengali': 0, 'chinese': 0, 'english': 0, 'esperanto': 0, 'finnish': 0, 'french': 0, 'greek': 0, 'galician': 0, 'german': 0, 'hungarian': 0, 'icelandic': 0, 'irish': 0, 'italian': 0, 'japanese': 0, 'russian': 0, 'spanish': 0, 'swahili': 0, 'turkish': 0}
In [5]:
check = False
vars = list(settings.keys())
print(f"targe var names: {vars}")
d_parts = [ ]
for lang in langs:
local_settings = settings.copy()
if check:
print(f"processing: {lang}")
try:
for f in [ f for f in target_files if lang.capitalize() in str(f) ]:
print(f"reading: {f}")
# 言語名の指定
local_settings[lang] = 1
# 型名の指定
for type in vars:
if type in str(f):
local_settings[type] = 1
#
try:
d = pd.read_csv(f, encoding='utf-8', sep = ",", on_bad_lines = 'skip') # Crucially, ...= skip
df = pd.DataFrame(d, columns = vars)
for var in [ var for var in (types + langs) if var != 'freq' ]:
df[var] = local_settings[var]
d_parts.append(df)
except FileNotFoundError:
pass
except IndexError:
pass
#
if verbose:
d_parts
targe var names: ['form', 'spell', 'sound', 'freq', 'arabic', 'bengali', 'chinese', 'english', 'esperanto', 'finnish', 'french', 'greek', 'galician', 'german', 'hungarian', 'icelandic', 'irish', 'italian', 'japanese', 'russian', 'spanish', 'swahili', 'turkish'] reading: data-words/base-spell-Arabic-r0-1k-mc.csv reading: data-words/base-spell-Chinese-r0-1k-mc.csv reading: data-words/base-sound-English-r6e-originals.csv reading: data-words/base-spell-English-r6e-originals.csv reading: data-words/base-spell-Esperanto-r0-orginal.csv reading: data-words/base-spell-Finnish-r0-1k-mc.csv reading: data-words/base-sound-French-r0-opendic-s900.csv reading: data-words/base-spell-French-r0-1k-mc.csv reading: data-words/base-spell-Greek-r0-1k-mc.csv reading: data-words/base-sound-German-r1a-original.csv reading: data-words/base-spell-German-r1a-originals.csv reading: data-words/base-spell-Hungarian-r0-1k-mc.csv reading: data-words/base-spell-Icelandic-r0-original.csv reading: data-words/base-spell-Irish-r0-1k-mc.csv reading: data-words/base-spell-Italian-r0-1k-mc.csv reading: data-words/base-spell-Japanese-r0-1k-mc.csv reading: data-words/base-spell-Russian-r0-1k-mc.csv reading: data-words/base-spell-Spanish-r0-1k-mc.csv reading: data-words/base-spell-Swahili-r0-1k-mc.csv reading: data-words/base-spell-Turkish-r0-1k-mc.csv
In [6]:
## データ統合
raw_df = pd.concat(d_parts)
raw_df
Out[6]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | german | hungarian | icelandic | irish | italian | japanese | russian | spanish | swahili | turkish | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | كما | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 1 | أنا | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 2 | له | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 3 | أن | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| 4 | هو | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 994 | burun | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
| 995 | çoğul | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
| 996 | öfke | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
| 997 | iddia | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
| 998 | kıta | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25217 rows × 23 columns
In [7]:
## 文字数の列を追加
raw_df['size'] = [ len(x) for x in raw_df[doc_type] ]
raw_df
Out[7]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | hungarian | icelandic | irish | italian | japanese | russian | spanish | swahili | turkish | size | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | كما | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
| 1 | أنا | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
| 2 | له | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
| 3 | أن | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
| 4 | هو | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 994 | burun | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 |
| 995 | çoğul | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 |
| 996 | öfke | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 |
| 997 | iddia | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 |
| 998 | kıta | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 |
25217 rows × 24 columns
In [8]:
## 言語名= language の列を追加
check = False
language_vals = [ ]
for i, row in raw_df.iterrows():
if check:
print(row)
for j, lang in enumerate(langs):
if check:
print(f"{i}: {lang}")
if row[lang] == 1:
language_vals.append(lang)
if verbose:
print(language_vals)
len(language_vals)
#
raw_df['language'] = language_vals
raw_df
Out[8]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | icelandic | irish | italian | japanese | russian | spanish | swahili | turkish | size | language | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | كما | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | arabic |
| 1 | أنا | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | arabic |
| 2 | له | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | arabic |
| 3 | أن | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | arabic |
| 4 | هو | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | arabic |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 994 | burun | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 995 | çoğul | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 996 | öfke | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
| 997 | iddia | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 998 | kıta | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
25217 rows × 25 columns
In [9]:
## 言語の選別
select_languages = True
#selected_langs = re.split(r",\s*", "english, french, german, russian, swahili")
selected_langs = re.split(r",\s*",
"arabic, bengali, chinese, english, french, german, \
greek, hungarian, russian, japanese, turkish")
print(f"selected languages: {selected_langs}")
if select_languages:
df_new = [ ]
for lang in selected_langs:
df_new.append(raw_df[raw_df[lang] == 1])
raw_df = pd.concat(df_new)
#
raw_df
selected languages: ['arabic', 'bengali', 'chinese', 'english', 'french', 'german', 'greek', 'hungarian', 'russian', 'japanese', 'turkish']
Out[9]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | icelandic | irish | italian | japanese | russian | spanish | swahili | turkish | size | language | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | كما | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | arabic |
| 1 | أنا | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | arabic |
| 2 | له | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | arabic |
| 3 | أن | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | arabic |
| 4 | هو | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | arabic |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 994 | burun | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 995 | çoğul | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 996 | öfke | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
| 997 | iddia | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 998 | kıta | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
18963 rows × 25 columns
In [10]:
## 文字数の分布
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.hist(raw_df['size'], bins = 40)
ax.set_xlabel('length of doc')
ax.set_ylabel('freq')
plt.title(f"Length distribution for docs")
fig.show()
/var/folders/s2/lk8hdt6j10j0xyycw1lbjsm40000gn/T/ipykernel_71730/1088473461.py:12: UserWarning: FigureCanvasAgg is non-interactive, and thus cannot be shown fig.show()
In [11]:
## 長さで濾過
print(f"max doc size: {max_doc_size}")
original_size = len(raw_df)
raw_df = raw_df[raw_df['size'] < max_doc_size]
filtered_size = len(raw_df)
print(f"{original_size - filtered_size} cases removed")
max doc size: 12 414 cases removed
In [12]:
## 結果の検査 1
for lang in langs:
print(raw_df[lang].value_counts())
arabic 0 17554 1 995 Name: count, dtype: int64 bengali 0 18549 Name: count, dtype: int64 chinese 0 17549 1 1000 Name: count, dtype: int64 english 0 10300 1 8249 Name: count, dtype: int64 esperanto 0 18549 Name: count, dtype: int64 finnish 0 18549 Name: count, dtype: int64 french 0 16733 1 1816 Name: count, dtype: int64 greek 0 17575 1 974 Name: count, dtype: int64 galician 0 18549 Name: count, dtype: int64 german 0 16984 1 1565 Name: count, dtype: int64 hungarian 0 17558 1 991 Name: count, dtype: int64 icelandic 0 18549 Name: count, dtype: int64 irish 0 18549 Name: count, dtype: int64 italian 0 18549 Name: count, dtype: int64 japanese 0 17549 1 1000 Name: count, dtype: int64 russian 0 17576 1 973 Name: count, dtype: int64 spanish 0 18549 Name: count, dtype: int64 swahili 0 18549 Name: count, dtype: int64 turkish 0 17563 1 986 Name: count, dtype: int64
In [13]:
## 結果の検査 2
for type in types:
print(raw_df[type].value_counts())
spell 1 12829 0 5720 Name: count, dtype: int64 sound 1 11630 0 6919 Name: count, dtype: int64 freq 1.0 17569 1 966 1 не 1 1 то время как 1 1 северу 1 1 него 1 1 будет 1 1 образом 1 1 мышь 1 Name: count, dtype: int64
In [14]:
## 統合: 割合補正を適用
eng_reduct_factor = 0.2
if balanced:
eng_df = raw_df[raw_df['english'] == 1]
non_eng_df = raw_df[raw_df['english'] == 0]
eng_reduced_df = eng_df.sample(round(len(eng_df) * eng_reduct_factor))
raw_df = pd.concat([eng_reduced_df, non_eng_df])
raw_df
Out[14]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | icelandic | irish | italian | japanese | russian | spanish | swahili | turkish | size | language | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 42 | ækʃən | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | english |
| 1241 | kəmyunətɪ | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | english |
| 796 | fɑks | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | english |
| 883 | currently | 1 | 1 | 1.0 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | english |
| 344 | bɹid | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | english |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 994 | burun | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 995 | çoğul | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 996 | öfke | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
| 997 | iddia | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 998 | kıta | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
11950 rows × 25 columns
In [15]:
## データの指定
raw_df = raw_df[raw_df[doc_attr] == 1]
print(f"doc_attr: {doc_attr}")
raw_df
doc_attr: spell
Out[15]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | icelandic | irish | italian | japanese | russian | spanish | swahili | turkish | size | language | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 883 | currently | 1 | 1 | 1.0 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | english |
| 3933 | trees | 1 | 1 | 1.0 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | english |
| 1997 | iron | 1 | 1 | 1.0 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | english |
| 483 | burn | 1 | 1 | 1.0 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | english |
| 1605 | girls | 1 | 1 | 1.0 | 0 | 0 | 0 | 1 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | english |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 994 | burun | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 995 | çoğul | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 996 | öfke | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
| 997 | iddia | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 5 | turkish |
| 998 | kıta | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 4 | turkish |
9502 rows × 25 columns
In [16]:
## 結果の検査 3
for lang in langs:
print(raw_df[lang].value_counts())
arabic 0 8507 1 995 Name: count, dtype: int64 bengali 0 9502 Name: count, dtype: int64 chinese 0 8502 1 1000 Name: count, dtype: int64 english 0 8681 1 821 Name: count, dtype: int64 esperanto 0 9502 Name: count, dtype: int64 finnish 0 9502 Name: count, dtype: int64 french 0 8517 1 985 Name: count, dtype: int64 greek 0 8528 1 974 Name: count, dtype: int64 galician 0 9502 Name: count, dtype: int64 german 0 8725 1 777 Name: count, dtype: int64 hungarian 0 8511 1 991 Name: count, dtype: int64 icelandic 0 9502 Name: count, dtype: int64 irish 0 9502 Name: count, dtype: int64 italian 0 9502 Name: count, dtype: int64 japanese 0 8502 1 1000 Name: count, dtype: int64 russian 0 8529 1 973 Name: count, dtype: int64 spanish 0 9502 Name: count, dtype: int64 swahili 0 9502 Name: count, dtype: int64 turkish 0 8516 1 986 Name: count, dtype: int64
解析¶
In [17]:
## 順序のランダマイズし,基本データを決定
import sklearn.utils
df = sklearn.utils.shuffle(raw_df)
In [18]:
## ngram の追加
import sys
sys.path.append('..')
import re
import ngrams
import importlib
importlib.reload(ngrams)
import ngrams_skippy
bases = df[doc_type]
## 1gram 列の追加
#sep = r""
#unigrams = [ list(filter(lambda x: len(x) > 0, y)) for y in [ re.split(sep, z) for z in bases ] ]
unigrams = ngrams.gen_unigrams(bases, sep = r"", check = False)
if verbose:
random.sample(unigrams, 5)
#
df['1gram'] = unigrams
#df.loc[:,'1gram'] = unigrams
df
Out[18]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | irish | italian | japanese | russian | spanish | swahili | turkish | size | language | 1gram | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 992 | واسع | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | arabic | [و, ا, س, ع] |
| 70 | itt | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | hungarian | [i, t, t] |
| 117 | est_venu | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | french | [e, s, t, _, v, e, n, u] |
| 178 | fünf | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | german | [f, ü, n, f] |
| 68 | さらに | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 | japanese | [さ, ら, に] |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 751 | опасность | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 9 | russian | [о, п, а, с, н, о, с, т, ь] |
| 486 | مركز | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | arabic | [م, ر, ك, ز] |
| 842 | 死んだ | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 | japanese | [死, ん, だ] |
| 67 | ακόμη | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | greek | [α, κ, ό, μ, η] |
| 469 | 所以 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | chinese | [所, 以] |
9502 rows × 26 columns
In [19]:
## 2gram列の追加
bigrams = ngrams.gen_bigrams(bases, sep = r"", check = False)
## 包括的 2gram の作成
if ngram_is_inclusive:
bigrams = [ [*b, *u] for b, u in zip(bigrams, unigrams) ]
if verbose:
print(random.sample(bigrams, 3))
In [20]:
df['2gram'] = bigrams
if verbose:
df
In [21]:
## 3gram列の追加
trigrams = ngrams.gen_trigrams(bases, sep = r"", check = False)
## 包括的 3gram の作成
if ngram_is_inclusive:
trigrams = [ [ *t, *b ] for t, b in zip(trigrams, bigrams) ]
if verbose:
print(random.sample(trigrams, 3))
In [22]:
df['3gram'] = trigrams
if verbose:
df
In [23]:
## skippy 2grams の生成
import sys
sys.path.append("..") # library path に一つ上の階層を追加
import ngrams_skippy
skippy_2grams = [ ngrams_skippy.generate_skippy_bigrams(x,
missing_mark = '…',
max_distance = max_distance_val, check = False)
for x in df['1gram'] ]
## 包括的 skippy 2-grams の生成
if ngram_is_inclusive:
for i, b2 in enumerate(skippy_2grams):
b2.extend(unigrams[i])
#
if verbose:
random.sample(skippy_2grams, 3)
In [24]:
## skippy 2gram 列の追加
df['skippy2gram'] = skippy_2grams
df
Out[24]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | russian | spanish | swahili | turkish | size | language | 1gram | 2gram | 3gram | skippy2gram | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 992 | واسع | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 4 | arabic | [و, ا, س, ع] | [وا, اس, سع, و, ا, س, ع] | [واس, اسع, وا, اس, سع, و, ا, س, ع] | [وا, و…س, و…ع, اس, ا…ع, سع, و, ا, س, ع] |
| 70 | itt | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 3 | hungarian | [i, t, t] | [it, tt, i, t, t] | [itt, it, tt, i, t, t] | [it, i…t, tt, i, t, t] |
| 117 | est_venu | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 8 | french | [e, s, t, _, v, e, n, u] | [es, st, t_, _v, ve, en, nu, e, s, t, _, v, e,... | [est, st_, t_v, _ve, ven, enu, es, st, t_, _v,... | [es, e…t, e…_, e…v, e…e, e…n, e…u, st, s…_, s…... |
| 178 | fünf | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 4 | german | [f, ü, n, f] | [fü, ün, nf, f, ü, n, f] | [fün, ünf, fü, ün, nf, f, ü, n, f] | [fü, f…n, f…f, ün, ü…f, nf, f, ü, n, f] |
| 68 | さらに | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 3 | japanese | [さ, ら, に] | [さら, らに, さ, ら, に] | [さらに, さら, らに, さ, ら, に] | [さら, さ…に, らに, さ, ら, に] |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 751 | опасность | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 1 | 0 | 0 | 0 | 9 | russian | [о, п, а, с, н, о, с, т, ь] | [оп, па, ас, сн, но, ос, ст, ть, о, п, а, с, н... | [опа, пас, асн, сно, нос, ост, сть, оп, па, ас... | [оп, о…а, о…с, о…н, о…о, о…т, о…ь, па, п…с, п…... |
| 486 | مركز | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 4 | arabic | [م, ر, ك, ز] | [مر, رك, كز, م, ر, ك, ز] | [مرك, ركز, مر, رك, كز, م, ر, ك, ز] | [مر, م…ك, م…ز, رك, ر…ز, كز, م, ر, ك, ز] |
| 842 | 死んだ | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 3 | japanese | [死, ん, だ] | [死ん, んだ, 死, ん, だ] | [死んだ, 死ん, んだ, 死, ん, だ] | [死ん, 死…だ, んだ, 死, ん, だ] |
| 67 | ακόμη | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 5 | greek | [α, κ, ό, μ, η] | [ακ, κό, όμ, μη, α, κ, ό, μ, η] | [ακό, κόμ, όμη, ακ, κό, όμ, μη, α, κ, ό, μ, η] | [ακ, α…ό, α…μ, α…η, κό, κ…μ, κ…η, όμ, ό…η, μη,... |
| 469 | 所以 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 0 | 2 | chinese | [所, 以] | [所以, 所, 以] | [所, 以, 所以, 所, 以] | [所以, 所, 以] |
9502 rows × 29 columns
In [25]:
## skippy 3grams の生成
#import sys
#sys.path.append("..") # library path に一つ上の階層を追加
import ngrams_skippy
skippy_3grams = [ ngrams_skippy.generate_skippy_trigrams(x,
missing_mark = '…',
max_distance = max_distance_val, check = False)
for x in df['1gram'] ]
## 包括的 skippy 3-grams の生成
if ngram_is_inclusive:
for i, t2 in enumerate(skippy_3grams):
t2.extend(skippy_2grams[i])
#
if verbose:
random.sample(skippy_3grams, 3)
In [26]:
## skippy 3gram 列の追加
df['skippy3gram'] = skippy_3grams
df
Out[26]:
| form | spell | sound | freq | arabic | bengali | chinese | english | esperanto | finnish | ... | spanish | swahili | turkish | size | language | 1gram | 2gram | 3gram | skippy2gram | skippy3gram | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 992 | واسع | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 4 | arabic | [و, ا, س, ع] | [وا, اس, سع, و, ا, س, ع] | [واس, اسع, وا, اس, سع, و, ا, س, ع] | [وا, و…س, و…ع, اس, ا…ع, سع, و, ا, س, ع] | [واس, وا…ع, و…سع, اسع, وا, و…س, و…ع, اس, ا…ع, ... |
| 70 | itt | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 3 | hungarian | [i, t, t] | [it, tt, i, t, t] | [itt, it, tt, i, t, t] | [it, i…t, tt, i, t, t] | [itt, it, i…t, tt, i, t, t] |
| 117 | est_venu | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 8 | french | [e, s, t, _, v, e, n, u] | [es, st, t_, _v, ve, en, nu, e, s, t, _, v, e,... | [est, st_, t_v, _ve, ven, enu, es, st, t_, _v,... | [es, e…t, e…_, e…v, e…e, e…n, e…u, st, s…_, s…... | [est, es…_, es…v, es…e, es…n, es…u, e…t_, e…t…... |
| 178 | fünf | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 4 | german | [f, ü, n, f] | [fü, ün, nf, f, ü, n, f] | [fün, ünf, fü, ün, nf, f, ü, n, f] | [fü, f…n, f…f, ün, ü…f, nf, f, ü, n, f] | [fün, fü…f, f…nf, ünf, fü, f…n, f…f, ün, ü…f, ... |
| 68 | さらに | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 3 | japanese | [さ, ら, に] | [さら, らに, さ, ら, に] | [さらに, さら, らに, さ, ら, に] | [さら, さ…に, らに, さ, ら, に] | [さらに, さら, さ…に, らに, さ, ら, に] |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 751 | опасность | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 9 | russian | [о, п, а, с, н, о, с, т, ь] | [оп, па, ас, сн, но, ос, ст, ть, о, п, а, с, н... | [опа, пас, асн, сно, нос, ост, сть, оп, па, ас... | [оп, о…а, о…с, о…н, о…о, о…т, о…ь, па, п…с, п…... | [опа, оп…с, оп…н, оп…о, оп…т, оп…ь, о…ас, о…а…... |
| 486 | مركز | 1 | 0 | 1.0 | 1 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 4 | arabic | [م, ر, ك, ز] | [مر, رك, كز, م, ر, ك, ز] | [مرك, ركز, مر, رك, كز, م, ر, ك, ز] | [مر, م…ك, م…ز, رك, ر…ز, كز, م, ر, ك, ز] | [مرك, مر…ز, م…كز, ركز, مر, م…ك, م…ز, رك, ر…ز, ... |
| 842 | 死んだ | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 3 | japanese | [死, ん, だ] | [死ん, んだ, 死, ん, だ] | [死んだ, 死ん, んだ, 死, ん, だ] | [死ん, 死…だ, んだ, 死, ん, だ] | [死んだ, 死ん, 死…だ, んだ, 死, ん, だ] |
| 67 | ακόμη | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 5 | greek | [α, κ, ό, μ, η] | [ακ, κό, όμ, μη, α, κ, ό, μ, η] | [ακό, κόμ, όμη, ακ, κό, όμ, μη, α, κ, ό, μ, η] | [ακ, α…ό, α…μ, α…η, κό, κ…μ, κ…η, όμ, ό…η, μη,... | [ακό, ακ…μ, ακ…η, α…όμ, α…ό…η, α…μη, κόμ, κό…η... |
| 469 | 所以 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | ... | 0 | 0 | 0 | 2 | chinese | [所, 以] | [所以, 所, 以] | [所, 以, 所以, 所, 以] | [所以, 所, 以] | [所以, 所以, 所, 以] |
9502 rows × 30 columns
In [27]:
## LDA 構築の基になる document-term matrix (dtm) を構築
from gensim.corpora.dictionary import Dictionary
bots = df[term_type]
diction = Dictionary(bots)
## 結果の確認
print(diction)
Dictionary<10080 unique tokens: ['ا', 'اس', 'ا…ع', 'س', 'سع']...>
In [28]:
## diction の濾過
import copy
diction_copy = copy.deepcopy(diction)
## filter適用: 実は諸刃の刃で,token数が少ない時には適用しない方が良い
print(f"min freq filter: {term_min_freq}")
print(f"abuse filter: {term_abuse_threshold}")
apply_filter = True
if apply_filter:
diction_copy.filter_extremes(no_below = term_min_freq, no_above = term_abuse_threshold)
## check
print(diction_copy)
min freq filter: 2 abuse filter: 0.04 Dictionary<6189 unique tokens: ['اس', 'ا…ع', 'س', 'سع', 'ع']...>
In [29]:
## Corpus (gensim の用語では corpus) の構築
corpus = [ diction.doc2bow(bot) for bot in bots ]
## check
check = True
if verbose:
sample_n = 5
print(random.sample(corpus, sample_n))
#
print(f"Number of documents: {len(corpus)}")
Number of documents: 9502
In [30]:
## LDA モデルの構築
from gensim.models import LdaModel
#from tqdm import tqdm
## LDAモデル
print(f"Building LDA model with n_topics: {n_topics}")
lda = LdaModel(corpus, id2word = diction, num_topics = n_topics, alpha = 0.01)
#
print(lda) # print(..)しないと中身が見れない
Building LDA model with n_topics: 20 LdaModel<num_terms=10080, num_topics=20, decay=0.5, chunksize=2000>
In [31]:
%%capture --no-display
## LDA のtopic ごとに,関連度の高い term を表示
import pandas as pd
n_terms = 40 # topic ごとに表示する term 数の指定
topic_dfs = [ ]
for topic in range(n_topics):
terms = [ ]
for i, prob in lda.get_topic_terms(topic, topn = n_terms):
terms.append(diction.id2token[ int(i) ])
#
topic_dfs.append(pd.DataFrame([terms], index = [ f'topic {topic+1}' ]))
#
topic_term_df = pd.concat(topic_dfs)
## Table で表示
topic_term_df.T
Out[31]:
| topic 1 | topic 2 | topic 3 | topic 4 | topic 5 | topic 6 | topic 7 | topic 8 | topic 9 | topic 10 | topic 11 | topic 12 | topic 13 | topic 14 | topic 15 | topic 16 | topic 17 | topic 18 | topic 19 | topic 20 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | ي | р | о | ü | s | о | ب | b | e | ا | a | ν | e | α | e | a | t | ο | i | a |
| 1 | د | а | о…о | ク | e | а | ل | e | ç | ل | l | l | i | ε | r | h | a | ν | e | k |
| 2 | ر | т | и | ü…e | n | с | る | u | f | م | z | e | o | ρ | t | r | m | υ | n | y |
| 3 | ة | е | v | ー | a | е | α | r | k | ال | s | κ | r | τ | e…e | n | a…t | λ | m | u |
| 4 | ف | м | н | ッ | e…s | т | ا | t | m | ر | á | τ…ί | n | σ | er | l | s | ά | s | o |
| 5 | ي…ة | з | т | ド | r…s | л | す | b…e | e…m | ة | v | ί | l | ι | r…e | c | at | ου | t | m |
| 6 | ت | ра | ор | ü…l | r | н | κ | l | me | و | sz | ντ | d | ο | n | p | t…a | κ | c | d |
| 7 | ق | ет | то | く | a…s | ь | ا…ع | be | ek | ن | t | ν…τ | c | π | e…t | g | n | ο…ν | s…e | l |
| 8 | ع | i | и…о | プ | o | к | ع | b…n | i | ت | k | ι | i…e | μ | en | a…a | ta | ό…ο | i…e | r |
| 9 | ر…ة | р…т | л…о | щ | g | п | ا…و | u…e | g | س | al | el | u | η | e…r | e | t…t | ε | s…i | ak |
| 10 | ية | м…т | o | üz | se | д | ط | e…e | e…h | ح | e | κ…ι | o…e | ί | g | ar | е | π…υ | m…e | o…a |
| 11 | mu | и…а | но | yü | es | и | κ…α | g | ht | ي | v…l | ー | t | ς | a…e | an | m…t | ε…ο | s…n | a…a |
| 12 | и…л | ν | т…н | b…ü | y | ть | ا…ب | b…t | h | ق | va | e…l | f | ό | re | ı | n…t | άν | m…n | ya |
| 13 | ح | р…а | р | д…н | p…s | о…а | ρό | b…r | i…h | ع | o | σ…τ | s | ά | e…n | ch | k | μο | أ | a…k |
| 14 | د…ة | та | т…е | з | s…s | а…ь | ل…ب | er | ç…k | ا…ا | n | ν…ι | l…e | έ | te | h…e | и | π…ά | ب | m…k |
| 15 | ج | ер | о…н | е | o…s | о…т | ال | n | e…k | ا…ة | d | k | on | δ | r…r | b | h | ο…ά | in | o…k |
| 16 | ظ | ц | ок | τ…υ | k | р | μ…ι | t…e | πρ | ل…ا | s…l | ρ…ν | o…r | λ | a | ha | o | α…ά | s…m | ka |
| 17 | ί…ω | за | в…о | д…й | ss | о…ь | ρ…μ | ö | i…m | ا…ر | a…a | κ…ν | n…e | γ | p | a…n | c | π | i…n | k…a |
| 18 | пл | в…р | т…о | ι…έ | s…e | у | κ…ά | m | ά…ς | ك | v…s | ι…ο | p | α…α | t…r | ü | a…a | μ…λ | en | ma |
| 19 | ım | ре | no | ο…δ | i | ы | ف | l…e | e…e | ج | l…s | ν…ε | in | ω | é | m | e | λ…ο | i…i | n |
In [32]:
%%capture --no-display
## pyLDAvis を使った結果 LDA の可視化: 階層クラスタリングより詳しい
import pyLDAvis
#installed_version = sys.version
installed_version = pyLDAvis.__version__
print(f"installed_version: {installed_version}")
if float(installed_version[:3]) > 3.1:
import pyLDAvis.gensim_models as gensimvis
else:
import pyLDAvis.gensim as gensimvis
#
pyLDAvis.enable_notebook()
#
lda_used = lda
corpus_used = corpus
diction_used = diction
## 実行パラメター
use_tSNE = False
if use_tSNE:
vis = gensimvis.prepare(lda_used, corpus_used, diction_used, mds = 'tsne',
n_jobs = 1, sort_topics = False)
else:
vis = gensimvis.prepare(lda_used, corpus_used, diction_used,
n_jobs = 1, sort_topics = False)
#
pyLDAvis.display(vis)
## topic を表わす円の重なりが多いならn_topics が多過ぎる可能性がある.
## ただし2Dで重なっていても,3Dなら重なっていない可能性もある
Out[32]:
In [33]:
## LDA がD に対して生成した topics の弁別性を確認
## 得られたtopics を確認
topic_dist = lda.get_topics()
if verbose:
topic_dist
In [34]:
## 検査 1: topic ごとに分布の和を取る
print(topic_dist.sum(axis = 1))
[0.9999999 0.99999994 1. 0.99999994 1. 1. 0.9999999 1. 1. 1. 1. 1. 0.9999999 0.99999994 0.99999976 0.99999994 1. 1. 0.99999994 1.0000001 ]
In [35]:
## 検査 2: 総和を求める: n_topics にほぼ等しいなら正常
print(topic_dist.sum())
20.000002
In [36]:
## term エンコード値の分布を確認
import matplotlib.pyplot as plt
plt.figure(figsize = (4,5))
sampling_rate = 0.3
df_size = len(topic_dist)
sample_n = round(df_size * sampling_rate)
topic_sampled = random.sample(list(topic_dist), sample_n)
T = sorted([ sorted(x, reverse = True) for x in topic_sampled ])
plt.plot(T, range(len(T)))
plt.title("Distribution of sorted values ({sample_n} samples) for topic/term encoding")
plt.show()
In [37]:
## tSNE を使った topics のグループ化 (3D)
from sklearn.manifold import TSNE
import numpy as np
## tSNE のパラメターを設定
## n_components は射影先の空間の次元: n_components = 3 なら3次元空間に射影
## perplexity は結合の強さを表わす指数で,値に拠って結果が代わるので,色々な値を試すと良い
#perplexity_val = 10 # 大き過ぎると良くない
top_perplexity_reduct_rate = 0.3
perplexity_val = round(len(topic_dist) * top_perplexity_reduct_rate)
topic_tSNE_3d = TSNE(n_components = 3, random_state = 0, perplexity = perplexity_val, n_iter = 1000)
## データに適用
top_tSNE_3d_fitted = topic_tSNE_3d.fit_transform(np.array(topic_dist))
In [38]:
## Plotlyを使って tSNE の結果の可視化 (3D)
#import plotly.express as pex
import plotly.graph_objects as go
import numpy as np
top_tSNE = top_tSNE_3d_fitted
fig = go.Figure(data = [go.Scatter3d(x = top_tSNE[:,0], y = top_tSNE[:,1], z = top_tSNE[:,2],
mode = 'markers')])
## 3D 散布図にラベルを追加する処理は未実装
title_val = f"3D tSNE view for LDA (#topics: {n_topics}, doc: {doc_type}, term: {term_type})"
fig.update_layout(autosize = False,
width = 600, height = 600, title = title_val)
fig.show()
In [39]:
## 構築した LDA モデルを使って文(書)を分類する
## .get_document_topics(..) は minimu_probability = 0としないと
## topic の値が小さい場合に値を返さないので,
## パラメター
ntopics = n_topics # LDA の構築の最に指定した値を使う
check = False
encoding = [ ]
for i, row in df.iterrows():
if check:
print(f"row: {row}")
doc = row[doc_type]
bot = row[term_type]
## get_document_topics(..) では minimu_probability = 0 としないと
## 値が十分に大きな topics に関してだけ値が取れる
enc = lda.get_document_topics(diction.doc2bow(bot), minimum_probability = 0)
if check:
print(f"enc: {enc}")
encoding.append(enc)
#
len(encoding)
Out[39]:
9502
In [40]:
## enc 列の追加
#df['enc'] = np.array(encoding) # This flattens arrays
#df['enc'] = list(encoding) # ineffective
df['enc'] = [ list(map(lambda x: x[1], y)) for y in encoding ]
if verbose:
df['enc']
In [41]:
## エンコーディングのstd の分布を見る
from scipy.stats import tstd
from matplotlib import pyplot as plt
plt.figure(figsize = (6,4))
std_data = [ tstd(x) for x in df['enc'] ]
plt.hist(std_data)
plt.title("Distribution of standard deviations")
plt.show()
In [42]:
## doc のエンコーディング
## 一様分布の事例を除外
from scipy.stats import tstd # standard deviation の計算用
print(f"{len(df)} instances before filtering")
check = False
doc_enc = df['enc']
max_std = max([ tstd(x) for x in doc_enc])
if check: print(f"std max: {max_std}")
min_std = min([ tstd(x) for x in doc_enc])
if check: print(f"std min: {min_std}")
first_min_std = list(sorted(set([ tstd(x) for x in doc_enc])))[-0]
print(f"std 1st min: {first_min_std}")
second_min_std = list(sorted(set([ tstd(x) for x in doc_enc])))[-1]
print(f"std 2nd min: {second_min_std}")
9502 instances before filtering std 1st min: 0.0 std 2nd min: 0.22286375423919064
In [43]:
## df_filtered の定義
## 閾値は2番目に小さい値より小さく最小値よりは大きな値であるべき
std_threshold = second_min_std / 4 # 穏健な値を得るために4で割った
print(f"std_threshold: {std_threshold}")
## Rっぽい次のコードは通らない
#df_filtered = df[ df['encoding'] > std_threshold ]
## 通るのは次のコード: Creating a list of True/False and apply it to DataFrame
std_tested = [ False if tstd(x) < std_threshold else True for x in df['enc'] ]
df_filtered = df[ std_tested ]
#
print(f"{len(df_filtered)} instances after filtering ({len(df) - len(df_filtered)} instances removed)")
std_threshold: 0.05571593855979766 8846 instances after filtering (656 instances removed)
In [44]:
## doc エンコード値の分布を確認
sample_n = 50
E = sorted([ sorted(x, reverse = True) for x in df_filtered['enc'].sample(sample_n) ])
plt.figure(figsize = (5,5))
plt.plot(E, range(len(E)))
plt.title(f"Distribution of sorted encoding values for sampled {sample_n} docs")
plt.show()
In [45]:
len(df_filtered['language'])
Out[45]:
8846
In [46]:
df_filtered['language'].value_counts
Out[46]:
<bound method IndexOpsMixin.value_counts of 992 arabic
70 hungarian
117 french
178 german
68 japanese
...
751 russian
486 arabic
842 japanese
67 greek
469 chinese
Name: language, Length: 8846, dtype: object>
In [47]:
## tSNE 用の事例サンプリング = tSNE_df の定義
tSNE_sampling = True
tSNE_sampling_rate = 0.33
if tSNE_sampling:
tSNE_df_original = df_filtered.copy()
sample_n = round(len(tSNE_df_original) * tSNE_sampling_rate)
tSNE_df = tSNE_df_original.sample(sample_n)
print(f"tSNE_df has {len(tSNE_df)} rows after sampling")
else:
tSNE_df = df_filtered
tSNE_df has 2919 rows after sampling
In [48]:
tSNE_df.columns
Out[48]:
Index(['form', 'spell', 'sound', 'freq', 'arabic', 'bengali', 'chinese',
'english', 'esperanto', 'finnish', 'french', 'greek', 'galician',
'german', 'hungarian', 'icelandic', 'irish', 'italian', 'japanese',
'russian', 'spanish', 'swahili', 'turkish', 'size', 'language', '1gram',
'2gram', '3gram', 'skippy2gram', 'skippy3gram', 'enc'],
dtype='object')
In [49]:
tSNE_df['language'].value_counts
Out[49]:
<bound method IndexOpsMixin.value_counts of 735 german
219 greek
309 hungarian
283 turkish
636 russian
...
67 hungarian
2236 english
523 greek
322 russian
447 english
Name: language, Length: 2919, dtype: object>
In [50]:
## tSNE の結果の可視化: Plotly を使った 3D 描画
import numpy as np
from sklearn.manifold import TSNE as tSNE
import plotly.express as pex
import plotly.graph_objects as go
import matplotlib.pyplot as plt
## tSNE のパラメターを設定
perplexity_max_val = round(len(tSNE_df)/4)
for perplexity_val in range(5, perplexity_max_val, 60):
## tSNE 事例の生成
tSNE_3d_varied = tSNE(n_components = 3, random_state = 0, perplexity = perplexity_val, n_iter = 1000)
## データに適用
doc_enc = np.array(list(tSNE_df['enc']))
doc_tSNE_3d_varied = tSNE_3d_varied.fit_transform(doc_enc)
T = zip(doc_tSNE_3d_varied[:,0], doc_tSNE_3d_varied[:,1], doc_tSNE_3d_varied[:,2],
tSNE_df['language']) # zip(..)が必要
df = pd.DataFrame(T, columns = ['D1', 'D2', 'D3', 'language'])
## 作図
fig = go.Figure()
for lang in np.unique(df['language']):
part = df[df['language'] == lang]
fig.add_trace(
go.Scatter3d(
x = part['D1'], y = part['D2'], z = part['D3'],
name = lang, mode = 'markers', marker = dict(size = 6),
showlegend = True
)
)
title_val = f"tSNE 3D map (ppl: {perplexity_val}) of '{doc_attr}'s encoded\n by LDA ({n_topics} topics, {term_type})"
fig.update_layout(title = dict(text = title_val),
autosize = False, width = 600, height = 600,)
fig.show()
In [51]:
## 階層クラスタリングのための事例のサンプリング
hc_sampling_rate = 0.1 # 大きくし過ぎると図が見にくい
df_size = len(tSNE_df)
hc_sample_n = round(df_size * hc_sampling_rate)
hc_df = tSNE_df.sample(hc_sample_n)
##
print(f"{hc_sample_n} rows are sampled")
hc_df['language'].value_counts()
292 rows are sampled
Out[51]:
language french 41 greek 37 turkish 36 arabic 33 russian 31 japanese 28 hungarian 27 german 23 english 21 chinese 15 Name: count, dtype: int64
In [52]:
## 日本語表示のための設定
#import matplotlib.pyplot as plt
#plt.rcParams["font.family"] = "Hiragino Sans" # Windows は別のフォント名を指定する必要がある
#plt.rcParams["font.family"] = "Lucida Sans Unicode"
In [53]:
## doc 階層クラスタリングの実行
import numpy as np
import plotly
import matplotlib.pyplot as plt
## 次の設定は arabic が文字化けする
#plt.rcParams["font.family"] = "Hiragino Sans" # Windows は別のフォント名を指定する必要がある
#plt.rcParams["font.family"] = "Lucida Sans Unicode"
from scipy.cluster.hierarchy import dendrogram, linkage
## 距離行列の生成
Enc = list(hc_df['enc'])
linkage = linkage(Enc, method = 'ward', metric = 'euclidean')
## 描画サイズの指定
plt.figure(figsize = (5, round(len(hc_df) * 0.15))) # This needs to be run here, before dendrogram construction.
## 事例ラベルの生成
label_vals = [ x[:max_doc_size] for x in list(hc_df[doc_type]) ] # truncate doc keys
## 樹状分岐図の作成
dendrogram(linkage, orientation = 'left', labels = label_vals, leaf_font_size = 7)
## 描画
plt.title(f"Hierarchical clustering of (sampled) {len(hc_df)} (= {100 * hc_sampling_rate}%) {doc_attr}s as docs\n \
encoded via LDA ({n_topics} topics) with {term_type} as terms")
## ラベルに language に対応する色を付ける
lang_colors = { lang_name : i for i, lang_name in enumerate(np.unique(hc_df['language'])) }
ax = plt.gca()
for ticker in ax.get_ymajorticklabels():
form = ticker.get_text()
row = hc_df.loc[hc_df[doc_type] == form]
#lang = row['language']
lang = row['language'].to_string().split()[-1] # trick
try:
lang_id = lang_colors[lang]
except (TypeError, KeyError):
print(f"color encoding error at: {lang}")
#
ticker.set_color(plotly.colors.qualitative.Plotly[lang_id]) # id の基数調整
#
plt.show()
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 25105 (\N{CJK UNIFIED IDEOGRAPH-6211}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20854 (\N{CJK UNIFIED IDEOGRAPH-5176}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20182 (\N{CJK UNIFIED IDEOGRAPH-4ED6}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21516 (\N{CJK UNIFIED IDEOGRAPH-540C}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 25511 (\N{CJK UNIFIED IDEOGRAPH-63A7}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21046 (\N{CJK UNIFIED IDEOGRAPH-5236}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 37504 (\N{CJK UNIFIED IDEOGRAPH-9280}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 34892 (\N{CJK UNIFIED IDEOGRAPH-884C}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20572 (\N{CJK UNIFIED IDEOGRAPH-505C}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 27490 (\N{CJK UNIFIED IDEOGRAPH-6B62}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 29366 (\N{CJK UNIFIED IDEOGRAPH-72B6}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 24907 (\N{CJK UNIFIED IDEOGRAPH-614B}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 31435 (\N{CJK UNIFIED IDEOGRAPH-7ACB}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12387 (\N{HIRAGANA LETTER SMALL TU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12390 (\N{HIRAGANA LETTER TE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12356 (\N{HIRAGANA LETTER I}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12383 (\N{HIRAGANA LETTER TA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12473 (\N{KATAKANA LETTER SU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12488 (\N{KATAKANA LETTER TO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12524 (\N{KATAKANA LETTER RE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12483 (\N{KATAKANA LETTER SMALL TU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12481 (\N{KATAKANA LETTER TI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 30495 (\N{CJK UNIFIED IDEOGRAPH-771F}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12398 (\N{HIRAGANA LETTER NO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20197 (\N{CJK UNIFIED IDEOGRAPH-4EE5}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21069 (\N{CJK UNIFIED IDEOGRAPH-524D}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12525 (\N{KATAKANA LETTER RO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12540 (\N{KATAKANA-HIRAGANA PROLONGED SOUND MARK}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12523 (\N{KATAKANA LETTER RU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 27604 (\N{CJK UNIFIED IDEOGRAPH-6BD4}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 36611 (\N{CJK UNIFIED IDEOGRAPH-8F03}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12377 (\N{HIRAGANA LETTER SU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12427 (\N{HIRAGANA LETTER RU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21517 (\N{CJK UNIFIED IDEOGRAPH-540D}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 35422 (\N{CJK UNIFIED IDEOGRAPH-8A5E}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 31532 (\N{CJK UNIFIED IDEOGRAPH-7B2C}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 19968 (\N{CJK UNIFIED IDEOGRAPH-4E00}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 32076 (\N{CJK UNIFIED IDEOGRAPH-7D4C}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 39443 (\N{CJK UNIFIED IDEOGRAPH-9A13}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12371 (\N{HIRAGANA LETTER KO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12366 (\N{HIRAGANA LETTER GI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12426 (\N{HIRAGANA LETTER RI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21326 (\N{CJK UNIFIED IDEOGRAPH-534E}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20102 (\N{CJK UNIFIED IDEOGRAPH-4E86}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20154 (\N{CJK UNIFIED IDEOGRAPH-4EBA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 29609 (\N{CJK UNIFIED IDEOGRAPH-73A9}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12381 (\N{HIRAGANA LETTER SO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12424 (\N{HIRAGANA LETTER YO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12358 (\N{HIRAGANA LETTER U}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12394 (\N{HIRAGANA LETTER NA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12510 (\N{KATAKANA LETTER MA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12471 (\N{KATAKANA LETTER SI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12531 (\N{KATAKANA LETTER N}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12367 (\N{HIRAGANA LETTER KU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12373 (\N{HIRAGANA LETTER SA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12435 (\N{HIRAGANA LETTER N}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12456 (\N{KATAKANA LETTER E}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12493 (\N{KATAKANA LETTER NE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12462 (\N{KATAKANA LETTER GI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 38263 (\N{CJK UNIFIED IDEOGRAPH-9577}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 24444 (\N{CJK UNIFIED IDEOGRAPH-5F7C}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12395 (\N{HIRAGANA LETTER NI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 25104 (\N{CJK UNIFIED IDEOGRAPH-6210}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21442 (\N{CJK UNIFIED IDEOGRAPH-53C2}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21152 (\N{CJK UNIFIED IDEOGRAPH-52A0}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 24863 (\N{CJK UNIFIED IDEOGRAPH-611F}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12376 (\N{HIRAGANA LETTER ZI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 26286 (\N{CJK UNIFIED IDEOGRAPH-66AE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12425 (\N{HIRAGANA LETTER RA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 30340 (\N{CJK UNIFIED IDEOGRAPH-7684}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 35498 (\N{CJK UNIFIED IDEOGRAPH-8AAA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12467 (\N{KATAKANA LETTER KO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12500 (\N{KATAKANA LETTER PI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 35352 (\N{CJK UNIFIED IDEOGRAPH-8A18}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12414 (\N{HIRAGANA LETTER MA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12384 (\N{HIRAGANA LETTER DA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12506 (\N{KATAKANA LETTER PE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12472 (\N{KATAKANA LETTER ZI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12522 (\N{KATAKANA LETTER RI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12503 (\N{KATAKANA LETTER PU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 35199 (\N{CJK UNIFIED IDEOGRAPH-897F}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 25918 (\N{CJK UNIFIED IDEOGRAPH-653E}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 19979 (\N{CJK UNIFIED IDEOGRAPH-4E0B}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 26080 (\N{CJK UNIFIED IDEOGRAPH-65E0}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21475 (\N{CJK UNIFIED IDEOGRAPH-53E3}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 19981 (\N{CJK UNIFIED IDEOGRAPH-4E0D}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 26371 (\N{CJK UNIFIED IDEOGRAPH-6703}) missing from current font.
In [54]:
## tSNE の結果の可視化 (2D)
#import seaborn as sns
import numpy as np
import plotly
import plotly.express as pex
import matplotlib.pyplot as plt
from adjustText import adjust_text
## tSNE 事例の生成
perplexity_selected = 250
tSNE_3d = tSNE(n_components = 3, random_state = 0, perplexity = perplexity_selected, n_iter = 1000)
## データに適用
doc_enc = np.array(list(tSNE_df['enc']))
doc_tSNE_3d = tSNE_3d.fit_transform(doc_enc)
T = zip(doc_tSNE_3d[:,0], doc_tSNE_3d[:,1], doc_tSNE_3d[:,2],
tSNE_df['language']) # zip(..)が必要
df = pd.DataFrame(T, columns = ['D1', 'D2', 'D3', 'language'])
## 描画
## 次の設定は arabic が文字化けする
#plt.rcParams["font.family"] = "Hiragino Sans" # Windows は別のフォント名を指定する必要がある
#plt.rcParams["font.family"] = "Lucida Sans Unicode"
plt.figure(figsize = (5, 5))
plt.set_colors = pex.colors.qualitative.Plotly
for r in [ np.roll([0,1,2], -i) for i in range(0,3) ]:
if check:
print(r)
X, Y = df.iloc[:,r[0]], df.iloc[:,r[1]]
gmax = max(X.max(), Y.max())
gmin = min(X.min(), Y.min())
plt.xlim(gmin, gmax)
plt.ylim(gmin, gmax)
colormap = pex.colors.qualitative.Plotly
lang_list = list(np.unique(tSNE_df['language']))
cmapped = [ colormap[lang_list.index(lang)] for lang in df['language'] ]
scatter = plt.scatter(X, Y, s = 40, c = cmapped, edgecolors = 'w')
## 文字を表示する事例のサンプリング
lab_sampling_rate = 0.02
lab_sample_n = round(len(tSNE_df) * lab_sampling_rate)
sampled_keys = [ doc[:max_doc_size] for doc in random.sample(list(tSNE_df[doc_type]), lab_sample_n) ]
## labels の生成
texts = [ ]
for x, y, s in zip(X, Y, sampled_keys):
texts.append(plt.text(x, y, s, size = 9, color = 'blue'))
## label に repel を追加: adjustText package の導入が必要
adjust_text(texts, force_points = 0.2, force_text = 0.2,
expand_points = (1, 1), expand_text = (1, 1),
arrowprops = dict(arrowstyle = "-", color = 'black', lw = 0.5))
#
plt.title(f"tSNE (ppl: {perplexity_selected}) 2D map of {len(tSNE_df)} {doc_attr}s via LDA ({term_type}; {n_topics} topics)")
#plt.legend(np.unique(cmapped))
plt.show()
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 38291 (\N{CJK UNIFIED IDEOGRAPH-9593}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12395 (\N{HIRAGANA LETTER NI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 24120 (\N{CJK UNIFIED IDEOGRAPH-5E38}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12494 (\N{KATAKANA LETTER NO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12540 (\N{KATAKANA-HIRAGANA PROLONGED SOUND MARK}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12488 (\N{KATAKANA LETTER TO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12461 (\N{KATAKANA LETTER KI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12515 (\N{KATAKANA LETTER SMALL YA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12503 (\N{KATAKANA LETTER PU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12486 (\N{KATAKANA LETTER TE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12531 (\N{KATAKANA LETTER N}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 23569 (\N{CJK UNIFIED IDEOGRAPH-5C11}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12375 (\N{HIRAGANA LETTER SI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 26368 (\N{CJK UNIFIED IDEOGRAPH-6700}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 22909 (\N{CJK UNIFIED IDEOGRAPH-597D}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12525 (\N{KATAKANA LETTER RO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 22475 (\N{CJK UNIFIED IDEOGRAPH-57CB}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12417 (\N{HIRAGANA LETTER ME}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12427 (\N{HIRAGANA LETTER RU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 23567 (\N{CJK UNIFIED IDEOGRAPH-5C0F}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 38291 (\N{CJK UNIFIED IDEOGRAPH-9593}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12395 (\N{HIRAGANA LETTER NI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 24120 (\N{CJK UNIFIED IDEOGRAPH-5E38}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12494 (\N{KATAKANA LETTER NO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12540 (\N{KATAKANA-HIRAGANA PROLONGED SOUND MARK}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12488 (\N{KATAKANA LETTER TO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12461 (\N{KATAKANA LETTER KI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12515 (\N{KATAKANA LETTER SMALL YA}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12503 (\N{KATAKANA LETTER PU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12486 (\N{KATAKANA LETTER TE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12531 (\N{KATAKANA LETTER N}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 23569 (\N{CJK UNIFIED IDEOGRAPH-5C11}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12375 (\N{HIRAGANA LETTER SI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 26368 (\N{CJK UNIFIED IDEOGRAPH-6700}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 22909 (\N{CJK UNIFIED IDEOGRAPH-597D}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12525 (\N{KATAKANA LETTER RO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 22475 (\N{CJK UNIFIED IDEOGRAPH-57CB}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12417 (\N{HIRAGANA LETTER ME}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12427 (\N{HIRAGANA LETTER RU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 23567 (\N{CJK UNIFIED IDEOGRAPH-5C0F}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12365 (\N{HIRAGANA LETTER KI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 21487 (\N{CJK UNIFIED IDEOGRAPH-53EF}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 33021 (\N{CJK UNIFIED IDEOGRAPH-80FD}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 20854 (\N{CJK UNIFIED IDEOGRAPH-5176}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 20182 (\N{CJK UNIFIED IDEOGRAPH-4ED6}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12475 (\N{KATAKANA LETTER SE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12463 (\N{KATAKANA LETTER KU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12471 (\N{KATAKANA LETTER SI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12519 (\N{KATAKANA LETTER SMALL YO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12507 (\N{KATAKANA LETTER HO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12523 (\N{KATAKANA LETTER RU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12489 (\N{KATAKANA LETTER DO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 27794 (\N{CJK UNIFIED IDEOGRAPH-6C92}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 26377 (\N{CJK UNIFIED IDEOGRAPH-6709}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12524 (\N{KATAKANA LETTER RE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12473 (\N{KATAKANA LETTER SU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12454 (\N{KATAKANA LETTER U}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 37027 (\N{CJK UNIFIED IDEOGRAPH-90A3}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 40636 (\N{CJK UNIFIED IDEOGRAPH-9EBC}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12365 (\N{HIRAGANA LETTER KI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21487 (\N{CJK UNIFIED IDEOGRAPH-53EF}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 33021 (\N{CJK UNIFIED IDEOGRAPH-80FD}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20854 (\N{CJK UNIFIED IDEOGRAPH-5176}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20182 (\N{CJK UNIFIED IDEOGRAPH-4ED6}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12475 (\N{KATAKANA LETTER SE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12463 (\N{KATAKANA LETTER KU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12471 (\N{KATAKANA LETTER SI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12519 (\N{KATAKANA LETTER SMALL YO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12507 (\N{KATAKANA LETTER HO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12523 (\N{KATAKANA LETTER RU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12489 (\N{KATAKANA LETTER DO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 27794 (\N{CJK UNIFIED IDEOGRAPH-6C92}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 26377 (\N{CJK UNIFIED IDEOGRAPH-6709}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12524 (\N{KATAKANA LETTER RE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12473 (\N{KATAKANA LETTER SU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12454 (\N{KATAKANA LETTER U}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 37027 (\N{CJK UNIFIED IDEOGRAPH-90A3}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 40636 (\N{CJK UNIFIED IDEOGRAPH-9EBC}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 37325 (\N{CJK UNIFIED IDEOGRAPH-91CD}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 36215 (\N{CJK UNIFIED IDEOGRAPH-8D77}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12371 (\N{HIRAGANA LETTER KO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 20123 (\N{CJK UNIFIED IDEOGRAPH-4E9B}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 27503 (\N{CJK UNIFIED IDEOGRAPH-6B6F}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 29273 (\N{CJK UNIFIED IDEOGRAPH-7259}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 38283 (\N{CJK UNIFIED IDEOGRAPH-958B}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 30330 (\N{CJK UNIFIED IDEOGRAPH-767A}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12377 (\N{HIRAGANA LETTER SU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 30058 (\N{CJK UNIFIED IDEOGRAPH-756A}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 21495 (\N{CJK UNIFIED IDEOGRAPH-53F7}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12506 (\N{KATAKANA LETTER PE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12472 (\N{KATAKANA LETTER ZI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 12474 (\N{KATAKANA LETTER ZU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 26080 (\N{CJK UNIFIED IDEOGRAPH-65E0}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 25140 (\N{CJK UNIFIED IDEOGRAPH-6234}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 22320 (\N{CJK UNIFIED IDEOGRAPH-5730}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/adjustText/__init__.py:564: UserWarning:
Glyph 38754 (\N{CJK UNIFIED IDEOGRAPH-9762}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 37325 (\N{CJK UNIFIED IDEOGRAPH-91CD}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 36215 (\N{CJK UNIFIED IDEOGRAPH-8D77}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12371 (\N{HIRAGANA LETTER KO}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 20123 (\N{CJK UNIFIED IDEOGRAPH-4E9B}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 27503 (\N{CJK UNIFIED IDEOGRAPH-6B6F}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 29273 (\N{CJK UNIFIED IDEOGRAPH-7259}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 38283 (\N{CJK UNIFIED IDEOGRAPH-958B}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 30330 (\N{CJK UNIFIED IDEOGRAPH-767A}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12377 (\N{HIRAGANA LETTER SU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 30058 (\N{CJK UNIFIED IDEOGRAPH-756A}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 21495 (\N{CJK UNIFIED IDEOGRAPH-53F7}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12506 (\N{KATAKANA LETTER PE}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12472 (\N{KATAKANA LETTER ZI}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 12474 (\N{KATAKANA LETTER ZU}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 26080 (\N{CJK UNIFIED IDEOGRAPH-65E0}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 25140 (\N{CJK UNIFIED IDEOGRAPH-6234}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 22320 (\N{CJK UNIFIED IDEOGRAPH-5730}) missing from current font.
/Volumes/K/opt/miniconda3/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning:
Glyph 38754 (\N{CJK UNIFIED IDEOGRAPH-9762}) missing from current font.
In [ ]: